发现用spark写表join比flink 简单很多,至少schema 可以省了,下面是一个例子

public static void main(String[] args) {		
		
		 SparkSession s= SparkSession.builder().appName("rec").getOrCreate();
		
		 Dataset<Row> user=s.read().format("jdbc")
	      .option("driver", "com.mysql.jdbc.Driver")
	      .option("url", "jdbc:mysql://*")
	      .option("dbtable", "user")
	      .option("user", "1")
	      .option("password", "1")
	      .load();
		
		 Dataset<Row> house=s.read().format("jdbc")
			      .option("driver", "com.mysql.jdbc.Driver")
			      .option("url", "jdbc:mysql://")
			      .option("dbtable", "house")
			      .option("user", "1")
			      .option("password", "1")
			      .load();
		
		 
		 user.cache();
		 
		 house.cache();
		 
		 
		 user.createOrReplaceTempView("user");
		 
		 house.createOrReplaceTempView("house");
		 	 
		
		 Dataset<Row> temp= s.sql("select user.user_name, house.house_name from user inner join house where user.uid=house.uid ");
		 
		 temp.write().csv("/home/ziroom/house-user");
		
		
	}